From: Keir Fraser Date: Wed, 14 Apr 2010 09:44:29 +0000 (+0100) Subject: Per-cpu tasklet lists. X-Git-Tag: archive/raspbian/4.8.0-1+rpi1~1^2~12377 X-Git-Url: https://dgit.raspbian.org/%22http://www.example.com/cgi/success//%22http:/www.example.com/cgi/success/?a=commitdiff_plain;h=58f293ed52f73282e8e71db854801a06aa0a168e;p=xen.git Per-cpu tasklet lists. Signed-off-by: Juergen Gross Signed-off-by: Keir Fraser --- diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c index 00c4300231..5f8a96100b 100644 --- a/xen/arch/x86/smpboot.c +++ b/xen/arch/x86/smpboot.c @@ -1374,6 +1374,7 @@ int cpu_down(unsigned int cpu) BUG_ON(cpu_online(cpu)); + migrate_tasklets_from_cpu(cpu); cpu_mcheck_distribute_cmci(); out: diff --git a/xen/common/softirq.c b/xen/common/softirq.c index 7b04f36f2d..f99d0d4f7c 100644 --- a/xen/common/softirq.c +++ b/xen/common/softirq.c @@ -78,7 +78,8 @@ void cpumask_raise_softirq(cpumask_t mask, unsigned int nr) void cpu_raise_softirq(unsigned int cpu, unsigned int nr) { - if ( !test_and_set_bit(nr, &softirq_pending(cpu)) ) + if ( !test_and_set_bit(nr, &softirq_pending(cpu)) + && (cpu != smp_processor_id()) ) smp_send_event_check_cpu(cpu); } @@ -87,46 +88,54 @@ void raise_softirq(unsigned int nr) set_bit(nr, &softirq_pending(smp_processor_id())); } -static LIST_HEAD(tasklet_list); +static bool_t tasklets_initialised; +static DEFINE_PER_CPU(struct list_head, tasklet_list); static DEFINE_SPINLOCK(tasklet_lock); -void tasklet_schedule(struct tasklet *t) +void tasklet_schedule_on_cpu(struct tasklet *t, unsigned int cpu) { unsigned long flags; spin_lock_irqsave(&tasklet_lock, flags); - if ( !t->is_dead ) + if ( tasklets_initialised && !t->is_dead ) { - if ( !t->is_scheduled && !t->is_running ) + t->scheduled_on = cpu; + if ( !t->is_running ) { - BUG_ON(!list_empty(&t->list)); - list_add_tail(&t->list, &tasklet_list); + list_del(&t->list); + list_add_tail(&t->list, &per_cpu(tasklet_list, cpu)); + cpu_raise_softirq(cpu, TASKLET_SOFTIRQ); } - t->is_scheduled = 1; - raise_softirq(TASKLET_SOFTIRQ); } spin_unlock_irqrestore(&tasklet_lock, flags); } +void tasklet_schedule(struct tasklet *t) +{ + tasklet_schedule_on_cpu(t, smp_processor_id()); +} + static void tasklet_action(void) { + unsigned int cpu = smp_processor_id(); + struct list_head *list = &per_cpu(tasklet_list, cpu); struct tasklet *t; spin_lock_irq(&tasklet_lock); - if ( list_empty(&tasklet_list) ) + if ( list_empty(list) ) { spin_unlock_irq(&tasklet_lock); return; } - t = list_entry(tasklet_list.next, struct tasklet, list); + t = list_entry(list->next, struct tasklet, list); list_del_init(&t->list); - BUG_ON(t->is_dead || t->is_running || !t->is_scheduled); - t->is_scheduled = 0; + BUG_ON(t->is_dead || t->is_running || (t->scheduled_on != cpu)); + t->scheduled_on = -1; t->is_running = 1; spin_unlock_irq(&tasklet_lock); @@ -135,17 +144,19 @@ static void tasklet_action(void) t->is_running = 0; - if ( t->is_scheduled ) + if ( t->scheduled_on >= 0 ) { BUG_ON(t->is_dead || !list_empty(&t->list)); - list_add_tail(&t->list, &tasklet_list); + list_add_tail(&t->list, &per_cpu(tasklet_list, t->scheduled_on)); + if ( t->scheduled_on != cpu ) + cpu_raise_softirq(cpu, TASKLET_SOFTIRQ); } /* * If there is more work to do then reschedule. We don't grab more work * immediately as we want to allow other softirq work to happen first. */ - if ( !list_empty(&tasklet_list) ) + if ( !list_empty(list) ) raise_softirq(TASKLET_SOFTIRQ); spin_unlock_irq(&tasklet_lock); @@ -159,10 +170,10 @@ void tasklet_kill(struct tasklet *t) if ( !list_empty(&t->list) ) { - BUG_ON(t->is_dead || t->is_running || !t->is_scheduled); + BUG_ON(t->is_dead || t->is_running || (t->scheduled_on < 0)); list_del_init(&t->list); } - t->is_scheduled = 0; + t->scheduled_on = -1; t->is_dead = 1; while ( t->is_running ) @@ -175,18 +186,48 @@ void tasklet_kill(struct tasklet *t) spin_unlock_irqrestore(&tasklet_lock, flags); } +void migrate_tasklets_from_cpu(unsigned int cpu) +{ + struct list_head *list = &per_cpu(tasklet_list, cpu); + unsigned long flags; + struct tasklet *t; + + spin_lock_irqsave(&tasklet_lock, flags); + + while ( !list_empty(list) ) + { + t = list_entry(list->next, struct tasklet, list); + BUG_ON(t->scheduled_on != cpu); + t->scheduled_on = smp_processor_id(); + list_del(&t->list); + list_add_tail(&t->list, &this_cpu(tasklet_list)); + } + + raise_softirq(TASKLET_SOFTIRQ); + + spin_unlock_irqrestore(&tasklet_lock, flags); +} + void tasklet_init( struct tasklet *t, void (*func)(unsigned long), unsigned long data) { memset(t, 0, sizeof(*t)); INIT_LIST_HEAD(&t->list); + t->scheduled_on = -1; t->func = func; t->data = data; } void __init softirq_init(void) { + unsigned int cpu; + + for_each_possible_cpu ( cpu ) + INIT_LIST_HEAD(&per_cpu(tasklet_list, cpu)); + open_softirq(TASKLET_SOFTIRQ, tasklet_action); + + tasklets_initialised = 1; } /* diff --git a/xen/include/xen/softirq.h b/xen/include/xen/softirq.h index b02c8e2509..0e3c4ea5f2 100644 --- a/xen/include/xen/softirq.h +++ b/xen/include/xen/softirq.h @@ -47,7 +47,7 @@ void process_pending_softirqs(void); struct tasklet { struct list_head list; - bool_t is_scheduled; + int scheduled_on; bool_t is_running; bool_t is_dead; void (*func)(unsigned long); @@ -55,10 +55,12 @@ struct tasklet }; #define DECLARE_TASKLET(name, func, data) \ - struct tasklet name = { LIST_HEAD_INIT(name.list), 0, 0, 0, func, data } + struct tasklet name = { LIST_HEAD_INIT(name.list), -1, 0, 0, func, data } +void tasklet_schedule_on_cpu(struct tasklet *t, unsigned int cpu); void tasklet_schedule(struct tasklet *t); void tasklet_kill(struct tasklet *t); +void migrate_tasklets_from_cpu(unsigned int cpu); void tasklet_init( struct tasklet *t, void (*func)(unsigned long), unsigned long data);